set_bit(vector,PSCB(vcpu,irr));
}
+void early_tick(VCPU *vcpu)
+{
+ UINT64 *p = &PSCB(vcpu,irr[3]);
+ printf("vcpu_check_pending: about to deliver early tick\n");
+ printf("&irr[0]=%p, irr[0]=0x%lx\n",p,*p);
+}
+
#define IA64_TPR_MMI 0x10000
#define IA64_TPR_MIC 0x000f0
}
//printf("returned to caller\n");
+#if 0
+if (vector == (PSCB(vcpu,itv) & 0xff)) {
+ UINT64 now = ia64_get_itc();
+ UINT64 itm = PSCB(vcpu,domain_itm);
+ if (now < itm) early_tick(vcpu);
+
+}
+#endif
return vector;
}
return(!itv || !!(itv & 0x10000));
}
+BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
+{
+ UINT64 itv = PSCB(vcpu,itv);
+ return (test_bit(itv, PSCB(vcpu,insvc)));
+}
+
BOOLEAN vcpu_timer_expired(VCPU *vcpu)
{
unsigned long domain_itm = PSCB(vcpu,domain_itm);
unsigned long now = ia64_get_itc();
- if (domain_itm && (now > domain_itm) &&
- !vcpu_timer_disabled(vcpu)) return TRUE;
- return FALSE;
+ if (!domain_itm) return FALSE;
+ if (now < domain_itm) return FALSE;
+ if (vcpu_timer_disabled(vcpu)) return FALSE;
+ return TRUE;
}
void vcpu_safe_set_itm(unsigned long val)
UINT64 itv = PSCB(vcpu,itv) & 0xff;
if (vcpu_timer_disabled(vcpu)) return;
+ if (vcpu_timer_inservice(vcpu)) return;
#if 1
// attempt to flag "timer tick before its due" source
{
IA64FAULT vcpu_cover(VCPU *vcpu)
{
+ // TODO: Only allowed for current vcpu
REGS *regs = vcpu_regs(vcpu);
if (!PSCB(vcpu,interrupt_collection_enabled)) {
IA64FAULT vcpu_bsw0(VCPU *vcpu)
{
+ // TODO: Only allowed for current vcpu
REGS *regs = vcpu_regs(vcpu);
unsigned long *r = ®s->r16;
unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
IA64FAULT vcpu_bsw1(VCPU *vcpu)
{
+ // TODO: Only allowed for current vcpu
REGS *regs = vcpu_regs(vcpu);
unsigned long *r = ®s->r16;
unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
// on the physical address, which is guaranteed to flush the same cache line
IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
{
+ // TODO: Only allowed for current vcpu
UINT64 mpaddr, ps;
IA64FAULT fault;
unsigned long match_dtlb(VCPU *, unsigned long, unsigned long *, unsigned long *);